This is an analysis code for a study that assesses the inference of value for unchosen options, given the learned outcomes of chosen options (see design below). The code loads preprocessed data from five MTurk experiments, and analyses their results. Our analysis includes Bayesian regression models, some of which take several hours to run. Accordingly, here we only load the models which we already ran, but if you wish to run the models, you may define the parameter run_models to be equal to 1.
Analysis of Experiment 1
Final Decisions phase
Here we present the tendency to select a rewarded item in the Final Decisions phase as a function of pair type (chosen vs. unchosen pairs). We then present the coefficients of a multilevel Bayesian logistic regression estimating the tendency to select a rewarded item as a function of choice type (chosen_trial_centered) and the difference in normalized liking ratings between rewarded and unrewarded items (norm_drate_by_outcome).
# ================================
# Choices in Final Decisions phase
# ================================
# compute mean probabiltiy to choose gain - for chosen and unchosen alone
p_gain <- clean_data_Exp1$final_decisions %>%
mutate(choice = ifelse(chosen_trial==1, "Chosen", "Unchosen")) %>%
group_by(PID, choice) %>%
dplyr::summarize(p_gain = mean(higher_outcome_chosen, na.rm=1))
p_gain_group <- p_gain %>%
group_by(choice) %>%
dplyr::summarize(mean = mean(p_gain, na.rm=1),
se = sd(p_gain, na.rm=1)/sqrt(n()),
n = n(),
n_effect = sum(p_gain<0.5),
percent = round(sum(p_gain<0.5)/n()*100)) %>%
mutate(`p(select rewarded)` = sprintf("%.2f \u00b1 %.2f", mean, se),
n_effect = ifelse(choice=="Chosen", n-n_effect, n_effect),
`n effect` = sprintf("%d/%d", n_effect, n),
`% effect` = ifelse(choice=="Chosen", 100-percent, percent)) %>%
dplyr::select(choice, `p(select rewarded)`, `n effect`, `% effect`)
# print table of group means and 1 sem
p_gain_group %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
choice
|
p(select rewarded)
|
n effect
|
% effect
|
|
Chosen
|
0.92 ± 0.01
|
235/235
|
100
|
|
Unchosen
|
0.43 ± 0.01
|
151/235
|
64
|
# ======================================
# Model choices as a function of ratings
# ======================================
if (run_models==1){
# run model and use function to rearrange the coefficients
M_Exp1_choice_ratings <- run_choice_ratings_model(subset(clean_data_Exp1$final_decisions, !is.na(left_chosen)),c(),params,"Exp1")
coef_list_Exp1 <- create_choice_ratings_coef_list(M_Exp1_choice_ratings, "Exp1", "Pairs")
} else {
load("../data/Models/Choice_Ratings_models/Coef_lists/coef_list_Exp1.RData")
}
coefs <- coef_list_Exp1$summary_group_coefs %>%
mutate(sig = ifelse((Median>0 & low95HDI>0 & high95HDI>0) |
(Median<0 & low95HDI<0 & high95HDI<0),"*",""),
value = sprintf("%.2f [%.2f, %.2f]%s",Median, low95HDI, high95HDI, sig)) %>%
dplyr::select(coef, value)
# print table of coefs
coefs %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
1.49 [1.37, 1.62]*
|
|
chosen_trial_centered
|
1.91 [1.77, 2.05]*
|
|
norm_drate_by_outcome
|
0.32 [0.19, 0.45]*
|
|
chosen_trial_centered:norm_drate_by_outcome
|
-0.03 [-0.16, 0.10]
|
|
Intercept_Chosen_Pairs
|
3.40 [3.18, 3.63]*
|
|
Slope_Chosen_Pairs
|
0.29 [0.08, 0.50]*
|
|
Intercept_Unchosen_Pairs
|
-0.42 [-0.56, -0.27]*
|
|
Slope_Unchosen_Pairs
|
0.35 [0.19, 0.51]*
|
RT analysis in Final Decisions phase
RT predicting choices
In this anaysis we assess whether RTs relate to choices in the Final Decisions phase. We follow previous reseach showing that approach toward reward is faster than avoidanc from loss, and so we expect participants to be faster when they choose the more valuable painting. To test this, we run a model predicting the probability to select a rewarded item as a function of pair type (chosen vs. unchosen) and reaction times. We can then rearrange the model coefficients to get an estimate of the effect of RT for chosen and unchosen pairs seperately (the slope term).
# ========================================
# RT avreages in chosen and unchosen pairs
# ========================================
RT_FD <- clean_data_Exp1$final_decisions %>%
mutate(`Pair type` = ifelse(chosen_trial==1, "Chosen pairs", "Unchosen pairs")) %>%
group_by(PID, `Pair type`) %>%
dplyr::summarise(rt = mean(rt, na.rm=1))
# show group means
RT_FD %>%
group_by(`Pair type`) %>%
dplyr::summarise(mean = mean(rt),
se = sd(rt)/sqrt(n())) %>%
mutate(RT = sprintf("%.2f \u00b1 %.2f", mean, se)) %>%
select(`Pair type`, RT) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
Pair type
|
RT
|
|
Chosen pairs
|
0.80 ± 0.01
|
|
Unchosen pairs
|
0.96 ± 0.01
|
# =================================================
# p(gain) as a function of zscored RT and pair type
# =================================================
# zscore rt
clean_data_Exp1$final_decisions <- clean_data_Exp1$final_decisions %>%
group_by(PID) %>%
mutate(zscored_rt = zscore(rt, na.rm=1))
# run model
if (run_models==1){
# run model and use function to rearrange the coefficients
M_zscored_RT_FD <- stan_glmer(data=subset(clean_data_Exp1$final_decisions, !is.na(left_chosen)),
higher_outcome_chosen ~ chosen_trial_centered*zscored_rt +
(chosen_trial_centered*zscored_rt | PID),
family = binomial(link="logit"),
adapt_delta = params$adapt_delta,
iter = params$iterations,
chains = params$chains,
warmup = params$warmup)
} else {
load("../data/Models/RT_final_decisions/M_zscored_RT_FD.RData")
}
# compute median and 95% HDIs of coefs
M_bias_zscored_rt <- as.data.frame(M_zscored_RT_FD) %>%
mutate(intercept_chosen = `(Intercept)` + chosen_trial_centered,
slope_chosen = zscored_rt + `chosen_trial_centered:zscored_rt`,
intercept_unchosen = `(Intercept)` - chosen_trial_centered,
slope_unchosen = zscored_rt - `chosen_trial_centered:zscored_rt`)
M_bias_zscored_rt_posterior <- M_bias_zscored_rt[,!str_detect(colnames(M_bias_zscored_rt),"PID")] %>%
gather(coef,value) %>%
mutate(coef = factor(coef,
levels = c("(Intercept)", "chosen_trial_centered", "zscored_rt", "chosen_trial_centered:zscored_rt",
"intercept_chosen","slope_chosen","intercept_unchosen","slope_unchosen"))) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(sig = ifelse((median>0 & HDI95_low>0 & HDI95_high>0) |
(median<0 & HDI95_low<0 & HDI95_high<0),"*",""),
value = sprintf("%.2f [%.2f, %.2f]%s",median, HDI95_low, HDI95_high, sig)) %>%
dplyr::select(coef, value)
# print table of coefs
print('Model coefficients in a model including zscored RT')
## [1] "Model coefficients in a model including zscored RT"
M_bias_zscored_rt_posterior %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
1.48 [1.36, 1.62]*
|
|
chosen_trial_centered
|
1.94 [1.79, 2.09]*
|
|
zscored_rt
|
0.02 [-0.06, 0.09]
|
|
chosen_trial_centered:zscored_rt
|
-0.09 [-0.16, -0.01]*
|
|
intercept_chosen
|
3.42 [3.20, 3.66]*
|
|
slope_chosen
|
-0.07 [-0.21, 0.08]
|
|
intercept_unchosen
|
-0.45 [-0.61, -0.30]*
|
|
slope_unchosen
|
0.10 [0.05, 0.15]*
|
# =========================================
# p(gain) as a function of RT and pair type
# =========================================
# we run the same model with raw RT to make sure the results are not modulated by the normalization of RT.
# run model
if (run_models==1){
# run model and use function to rearrange the coefficients
M_RT_FD <- stan_glmer(data=subset(clean_data_Exp1$final_decisions, !is.na(left_chosen)),
higher_outcome_chosen ~ chosen_trial_centered*zscored_rt +
(chosen_trial_centered*zscored_rt | PID),
family = binomial(link="logit"),
adapt_delta = params$adapt_delta,
iter = params$iterations,
chains = params$chains,
warmup = params$warmup)
} else {
load("../data/Models/RT_final_decisions/M_RT_FD.RData")
}
# compute median and 95% HDIs of coefs
M_bias_rt <- as.data.frame(M_RT_FD) %>%
mutate(intercept_chosen = `(Intercept)` + chosen_trial_centered,
slope_chosen = rt + `chosen_trial_centered:rt`,
intercept_unchosen = `(Intercept)` - chosen_trial_centered,
slope_unchosen = rt - `chosen_trial_centered:rt`)
M_bias_rt_posterior <- M_bias_rt[,!str_detect(colnames(M_bias_rt),"PID")] %>%
gather(coef,value) %>%
mutate(coef = factor(coef,
levels = c("(Intercept)", "chosen_trial_centered", "rt", "chosen_trial_centered:rt",
"intercept_chosen","slope_chosen","intercept_unchosen","slope_unchosen"))) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(sig = ifelse((median>0 & HDI95_low>0 & HDI95_high>0) |
(median<0 & HDI95_low<0 & HDI95_high<0),"*",""),
value = sprintf("%.2f [%.2f, %.2f]%s",median, HDI95_low, HDI95_high, sig)) %>%
dplyr::select(coef, value)
# print table of coefs
print('Model coefficients in a model including RT (to make sure the resules are not modulated by the normalization of RT)')
## [1] "Model coefficients in a model including RT (to make sure the resules are not modulated by the normalization of RT)"
M_bias_rt_posterior %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
1.35 [1.09, 1.62]*
|
|
chosen_trial_centered
|
2.10 [1.85, 2.35]*
|
|
rt
|
0.18 [-0.08, 0.45]
|
|
chosen_trial_centered:rt
|
-0.15 [-0.38, 0.10]
|
|
intercept_chosen
|
3.45 [3.02, 3.88]*
|
|
slope_chosen
|
0.02 [-0.43, 0.53]
|
|
intercept_unchosen
|
-0.74 [-1.02, -0.46]*
|
|
slope_unchosen
|
0.33 [0.17, 0.49]*
|
# ====================================
# plot models for Supplementary Text 1
# ====================================
# we create 100 fake x values (zscored RTs), and for each we will predict the y values of every draw from the posterior distributions. We do that for every condition (chosen vs. unchosen pairs) seperately using their rearranged coefs (e.g., the chosen intercept and chosen slope)
n <- 1000 # number x values
x_steps <- seq(-5, 5, length.out = n)
# create new data for chosen and unchosen pairs seperately
model_sims <- M_bias_zscored_rt[,c("intercept_chosen", "slope_chosen","intercept_unchosen","slope_unchosen")]
pred_fake_chosen <- as.data.frame(matrix(0,nrow=nrow(model_sims),ncol=n))
pred_fake_unchosen <- as.data.frame(matrix(0,nrow=nrow(model_sims),ncol=n))
for (x in 1:n){
pred_fake_chosen[,x] <- invlogit(model_sims["intercept_chosen"]+ model_sims["slope_chosen"]*x_steps[x])
pred_fake_unchosen[,x] <- invlogit(model_sims["intercept_unchosen"]+ model_sims["slope_unchosen"]*x_steps[x])
}
pred_list <- list(pred_fake_chosen, pred_fake_unchosen)
names_conds <- c("chosen", "unchosen")
# compute summary stats for each x observation
predicted_summary_list <- list()
df_x <- data.frame(obs = 1:n, x = x_steps)
for (c in 1:length(pred_list)){
df_pred <- pred_list[[c]] %>%
as.data.frame %>%
setNames(seq_len(ncol(.))) %>%
tibble::rownames_to_column("posterior_sample") %>%
tidyr::gather_("obs", "fitted", setdiff(names(.), "posterior_sample")) %>%
group_by(obs) %>%
dplyr::summarise(median = median(fitted),
lower = quantile(fitted, 0.025),
upper = quantile(fitted, 0.975)) %>%
mutate(obs = as.numeric(obs)) %>%
left_join(df_x, by="obs") %>% arrange(obs)
predicted_summary_list[[c]] <- df_pred; names(predicted_summary_list)[c] <- names_conds[c]
}
# line fits
predicted_draws_rt_model <- data.frame(predicted_summary_list[[1]]) %>%
mutate(choice = "Chosen pairs") %>%
rbind(mutate(data.frame(predicted_summary_list[[2]]),
choice = "Unchosen pairs"))
# model text
rt_model_text <- M_bias_zscored_rt_posterior[M_bias_zscored_rt_posterior$coef %in% c("slope_chosen", "slope_unchosen"),] %>%
mutate(choice = ifelse(coef=="slope_chosen", "Chosen", "Unchosen"),
x = ifelse(coef=="slope_chosen", -2.5, 2.5),
y = ifelse(coef=="slope_chosen", 0.8, 0.2),
text = sprintf("\u03b2(%s Slope):\n%s",choice, value)) %>%
select(-c(coef,value))
# assign fig parameters
linesize <- 1
pointsize <- 5
pointstroke <- 0.55
n_sem <- 1 # how many standard errors do we want the error bars to include?
# RT in chosen and unchosen pairs
p1 <- ggplot(RT_FD %>%
mutate(condition=NaN, choice=ifelse(`Pair type`=="Chosen pairs", "Chosen", "Unchosen")),
aes(x=choice,y=rt)) +
stat_summary_bin(aes(y=rt), fun.y="mean", geom="bar", binwidth=0.2,
position=position_dodge(width=1), fill="grey") +
geom_point(aes(color=factor(condition)), position=position_jitterdodge(dodge.width=1, jitter.width=0.1),
fill="white", shape=21, stroke=point_stroke, size=point_size) +
scale_color_manual(values="black") +
stat_summary(fun.data=mean_se, fun.args = list(mult=n_sem), geom="errorbar", width=0.1, size=1,
position=position_nudge(0.2), color="black") +
theme +
scale_y_continuous(expand=c(0,0), limits=c(0, max(RT_FD$rt + 0.1)), breaks=c(0,0.5,1, 1.5)) +
theme(legend.position="none",
aspect.ratio=3/2,
plot.title = element_text(margin=margin(0,0,30,0))) +
labs(x = "Pair type",
y="Reaction times (sec)",
title="RT in Final Decisions")
# model fit
p2 <- ggplot(predicted_draws_rt_model, aes(y=median,x=x,group=choice)) +
geom_ribbon(aes(ymin=lower, ymax=upper), fill="#E9E9E9") +
geom_line(aes(y=median, linetype=choice), colour="black", size=linesize*1.5) +
geom_hline(yintercept=0.5, size=linesize, linetype="dashed") +
geom_vline(xintercept=0, size=linesize, linetype="dashed") +
scale_y_continuous(expand=c(0,0), breaks=c(0,0.5,1), limits=c(0,1.025)) +
scale_x_continuous(expand=c(0,0)) +
scale_linetype_manual(values=c("longdash", "solid")) +
theme + point_plot_theme +
theme(legend.position="none",
plot.title = element_text(margin=margin(0,0,30,0))) +
geom_text(rt_model_text,mapping=aes(x=x, y=y, group=choice, label=text), size=7) +
labs(y="Predicted p(select S+)",
x="Reaction times (z-scored)",
title="Choices and RTs in Final Decisions")
p <- plot_grid(p1,p2,
ncol=2,
axis="bt",
align="v",
labels=c("a","b"),
label_size = 30,
label_fontfamily = "Helvetica",
rel_widths = c(0.8,1))
if (Save_plots == 1) {ggsave(filename=sprintf("../results/Plots/%s.%s","Figure_RT",fig_type),
plot=p,
width=fig_size[1]+4,
height=fig_size[2]-2)}
Outcome Estimation phase
Here we present the tendency to estimate items as rewarded ones as a fucntion of choice (chosen vs. unchosen items) and actual given reward (rewarded vs. unrewarded, for unchosen items, this is the outcome of their chosen counterpart).
# =======================================
# Estimations in Outcome Estimation phase
# =======================================
outcome_estimation <- clean_data_Exp1$outcome_evaluation %>%
mutate(choice = ifelse(chosen_obj==1, "Chosen", "Unchosen"),
reward = ifelse(reward_type==1, "Rewarded", "Unrewarded")) %>%
group_by(PID, choice, reward) %>%
dplyr::summarise(gain_eval = mean(outcome_eval_gain, na.rm=1),
eval_acc = mean(outcome_eval_acc, na.rm=1),
eval_rt = mean(outcome_eval_rt, na.rm=1))
outcome_est_group <- outcome_estimation %>%
#subset(inverse_strategy==0) %>%
group_by(choice, reward) %>%
dplyr::summarise(mean = mean(gain_eval, na.rm=1),
se = sd(gain_eval, na.rm=1)/sqrt(n()),
n = n(),
n_effect = sum(gain_eval<0.5),
percent = round(sum(gain_eval<0.5)/n()*100)) %>%
mutate(`p(estimate as rewarded)` = sprintf("%.2f \u00b1 %.2f",mean, se),
n_effect = ifelse((reward=="Rewarded" & choice=="Chosen") | (reward=="Unrewarded" & choice=="Unchosen"),
n-n_effect, n_effect),
`n effect` = sprintf("%d/%d", n_effect, n),
`% effect` = n_effect/n*100) %>%
dplyr::select(choice,reward,`p(estimate as rewarded)`,`n effect`,`% effect`)
# print table
outcome_est_group %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
choice
|
reward
|
p(estimate as rewarded)
|
n effect
|
% effect
|
|
Chosen
|
Rewarded
|
0.95 ± 0.01
|
233/235
|
99.14894
|
|
Chosen
|
Unrewarded
|
0.14 ± 0.01
|
208/235
|
88.51064
|
|
Unchosen
|
Rewarded
|
0.39 ± 0.02
|
139/235
|
59.14894
|
|
Unchosen
|
Unrewarded
|
0.54 ± 0.02
|
155/235
|
65.95745
|
# ===============================================
# Model estimations as a function of outcome type
# ===============================================
if (run_models==1){
clean_data_Exp1$outcome_evaluation <- mutate(clean_data_Exp1$outcome_evaluation,
chosen_obj_centered = ifelse(chosen_obj==0,-1,1))
M_outcome_estimation <- stan_glmer(data = clean_data_Exp1$outcome_evaluation,
outcome_eval_gain ~ chosen_obj_centered * reward_type +
(chosen_obj_centered * reward_type | PID),
family = binomial(link="logit"),
adapt_delta = params$adapt_delta,
iter = params$iterations,
chains = params$chains,
warmup = params$warmup)
save(M_outcome_estimation, file = "../data/Models/Outcome_estimation/M_outcome_estimation.RData")
} else {
load("../data/Models/Outcome_estimation/M_outcome_estimation.RData")
}
# Rearrange model coefficients to get coefficients of interest
model_posterior <- as.data.frame(M_outcome_estimation)
outcome_est_group_fits <- model_posterior[,!str_detect(colnames(model_posterior),"PID")] %>%
mutate(gain_chosen = `(Intercept)` + chosen_obj_centered + reward_type + `chosen_obj_centered:reward_type`,
no_gain_chosen = `(Intercept)` + chosen_obj_centered - reward_type - `chosen_obj_centered:reward_type`,
gain_unchosen = `(Intercept)` - chosen_obj_centered + reward_type - `chosen_obj_centered:reward_type`,
no_gain_unchosen = `(Intercept)` - chosen_obj_centered - reward_type + `chosen_obj_centered:reward_type`) %>%
gather(coef,value) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(value = sprintf("%.2f [%.2f, %.2f]",median, HDI95_low, HDI95_high)) %>%
dplyr::select(coef, value)
outcome_est_group_fits %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
0.19 [0.04, 0.35]
|
|
chosen_obj_centered
|
0.36 [0.22, 0.50]
|
|
chosen_obj_centered:reward_type
|
1.80 [1.62, 2.00]
|
|
gain_chosen
|
3.74 [3.30, 4.27]
|
|
gain_unchosen
|
-0.57 [-0.75, -0.39]
|
|
no_gain_chosen
|
-2.64 [-3.04, -2.30]
|
|
no_gain_unchosen
|
0.24 [0.03, 0.45]
|
|
reward_type
|
1.40 [1.23, 1.58]
|
# ==========================================================================
# Relationship between inverse decision bias and inverse estimation of value
# ==========================================================================
inverse_decision_estimation <- outcome_estimation %>%
select(-c(eval_acc, eval_rt)) %>%
spread(reward, gain_eval) %>%
mutate(reward_diff = Rewarded-Unrewarded) %>%
merge(p_gain, by=c("PID","choice")) %>%
mutate(choice_centered = ifelse(choice=="Chosen", 1, -1),
p_gain_centered = p_gain - 0.5)
# ggplot(inverse_decision_estimation, aes(y=p_gain, x=reward_diff)) +
# geom_point() +
# stat_smooth(method=lm) +
# facet_wrap(.~choice) +
# theme + point_plot_theme +
# geom_hline(yintercept=0, size=1, linetype="dashed") +
# geom_vline(xintercept=0, size=1, linetype="dashed")
if (run_models==1){
M_inverse_decision_estimation <- stan_glm(data = inverse_decision_estimation,
p_gain_centered ~ choice_centered * reward_diff,
family = gaussian(),
adapt_delta = params$adapt_delta,
iter = params$iterations,
chains = params$chains,
warmup = params$warmup)
save(M_inverse_decision_estimation, file = "../data/Models/Outcome_estimation/M_inverse_decision_estimation.RData")
} else {
load("../data/Models/Outcome_estimation/M_inverse_decision_estimation.RData")
}
inverse_group_fits <- as.data.frame(M_inverse_decision_estimation) %>%
mutate(intercept_chosen = `(Intercept)` + choice_centered,
slope_chosen = reward_diff + `choice_centered:reward_diff`,
intercept_unchosen = `(Intercept)` - choice_centered,
slope_unchosen = reward_diff - `choice_centered:reward_diff`) %>%
gather(coef,value) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(value = sprintf("%.2f [%.2f, %.2f]",median, HDI95_low, HDI95_high)) %>%
dplyr::select(coef, value)
inverse_group_fits %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
0.07 [0.05, 0.10]
|
|
choice_centered
|
0.09 [0.07, 0.11]
|
|
choice_centered:reward_diff
|
-0.02 [-0.06, 0.01]
|
|
intercept_chosen
|
0.16 [0.12, 0.21]
|
|
intercept_unchosen
|
-0.02 [-0.03, -0.00]
|
|
reward_diff
|
0.34 [0.31, 0.37]
|
|
sigma
|
0.12 [0.11, 0.13]
|
|
slope_chosen
|
0.32 [0.26, 0.37]
|
|
slope_unchosen
|
0.37 [0.33, 0.40]
|
Figure 2
linesize <- 1
pointsize <- 5
pointstroke <- 0.55
n_sem <- 1 # how many standard errors do we want the error bars to include?
# =================================
# Panel a: means for p(choose gain)
# =================================
bias <- clean_data_Exp1$final_decisions %>%
group_by(PID, chosen_trial) %>%
dplyr::summarize(p_gain = mean(higher_outcome_chosen, na.rm=1), condition=NaN)
p1 <- ggplot(bias, aes(x=factor(chosen_trial),y=p_gain)) +
stat_summary_bin(aes(y=p_gain), fun.y="mean", geom="bar", binwidth=0.2,
position=position_dodge(width=1), fill="grey") +
geom_point(aes(color=factor(condition)), position=position_jitterdodge(dodge.width=1, jitter.width=0.1),
fill="white", shape=21, stroke=pointstroke, size=pointsize) +
scale_color_manual(values="black") +
stat_summary(fun.data=mean_se, fun.args = list(mult=n_sem), geom="errorbar", width=0.1, size=0.9,
position=position_nudge(0.2), color="black") + # "turquoise4"
geom_hline(yintercept=0.5, size=linesize, linetype="dashed") +
scale_y_continuous(expand=c(0,0), breaks=c(0,0.5,1), limits=c(0,1.02)) +
theme +
theme(axis.title.x=element_blank(),
legend.position="none" ,
aspect.ratio=2.5/2,
plot.title = element_text(margin=margin(0,0,30,0))) +
labs(y="p(select S+)", title="Final Decisions Choices") +
scale_x_discrete(breaks = c("1","0"), limits=c("1","0"),
# labels = c("1" = expression(atop(S[chosen],paste("(learned)"))),
# "0" = expression(atop(S[unchosen],paste("(inferred)")))))
labels = c("1" = expression(S[chosen]*" (learned)"),
"0" = expression(S[unchosen]*" (inferred)")))
# ==========================
# Panel b: choice model fits
# ==========================
# we create 100 fake x values (delta ratings), and for each we will predict the y values of every draw from the posterior distributions. We do that for every condition (chosen vs. unchosen pairs) seperately using their rearranged coefs (e.g., the chosen intercept and chosen slope)
n <- 100 # number x values
fake_x <- seq(-2,2,length.out=n)
predicted_group_draws_list <- list()
for (c in 1:ncol(coef_list_Exp1$coefs_signs)){ # go over each condition (e.g., chosen and unchosen pairs)
pred_fake <- as.data.frame(matrix(0,nrow=nrow(coef_list_Exp1$posterior_group_coefs),ncol=n))
for (x in 1:n){ # run over each x observation
# find the relevant column of the intercept and slope terms for the current condition
intercept_col <- which(grepl(paste0("Intercept_",colnames(coef_list_Exp1$coefs_signs)[c]),
colnames(coef_list_Exp1$posterior_group_coefs)))
slope_col <- which(grepl(paste0("Slope_",colnames(coef_list_Exp1$coefs_signs)[c]),
colnames(coef_list_Exp1$posterior_group_coefs)))
# predict the probability for a gain response for each posterior iteration in current x value
pred_fake[,x] <- invlogit(coef_list_Exp1$posterior_group_coefs[,intercept_col] +
coef_list_Exp1$posterior_group_coefs[,slope_col]*fake_x[x])
}
predicted_group_draws_list[[c]] <- pred_fake
names(predicted_group_draws_list)[c] <- colnames(coef_list_Exp1$coefs_signs)[c]
}
# now we can compute the median and 95% HDI for every x value
predicted_group_summary_list <- list()
df_x <- data.frame(obs = 1:n, x = fake_x)
for (c in 1:ncol(coef_list_Exp1$coefs_signs)){
df_pred <- predicted_group_draws_list[[c]] %>%
setNames(seq_len(ncol(.))) %>%
tibble::rownames_to_column("posterior_sample") %>%
tidyr::gather_("obs", "fitted", setdiff(names(.), "posterior_sample")) %>%
group_by(obs) %>%
dplyr::summarise(median = median(fitted),
lower = quantile(fitted, 0.025),
upper = quantile(fitted, 0.975)) %>%
mutate(obs = as.numeric(obs)) %>%
left_join(df_x, by="obs") %>% arrange(obs)
predicted_group_summary_list[[c]] <- df_pred
names(predicted_group_summary_list)[c] <- names(predicted_group_draws_list)[c]
}
# line fits
predicted_draws_choice_model <- data.frame(predicted_group_summary_list$Chosen_Pairs) %>%
mutate(choice = "Chosen pairs") %>%
rbind(mutate(data.frame(predicted_group_summary_list$Unchosen_Pairs),
choice = "Unchosen pairs"))
# model text
choice_model_text <- subset(coef_list_Exp1$summary_group_coefs,
grepl("Intercept_",coef) | grepl("Slope_",coef)) %>%
separate(coef, c("coef","choice","condition"), "_") %>%
mutate(sig = ifelse((Median>0 & low95HDI>0 & high95HDI>0) | (Median<0 & low95HDI<0 & high95HDI<0),"*",""),
median = sprintf("%.2f%s",Median, sig),
text = sprintf("\u03b2(%s %s):\n%.2f [%.2f, %.2f]%s",choice, coef, Median, low95HDI, high95HDI, sig)) %>%
mutate(x = ifelse(choice=="Chosen", -0.5, 0.5),
y = ifelse(choice=="Chosen", 0.8, 0.2))
# plot fit
p2 <- ggplot(predicted_draws_choice_model, aes(y=median,x=x,group=choice)) +
geom_ribbon(aes(ymin=lower, ymax=upper), fill="#E9E9E9") +
geom_line(aes(y=median, linetype=choice), colour="black", size=linesize*1.5) +
geom_hline(yintercept=0.5, size=linesize, linetype="dashed") +
geom_vline(xintercept=0, size=linesize, linetype="dashed") +
scale_y_continuous(expand=c(0,0), breaks=c(0,0.5,1), limits=c(0,1.025)) +
scale_x_continuous(expand=c(0,0)) +
scale_linetype_manual(values=c("longdash", "solid")) +
theme + point_plot_theme +
theme(legend.position="none",
plot.title = element_text(margin=margin(0,0,30,0))) +
geom_text(subset(choice_model_text, coef=="Intercept"),
mapping=aes(x=x, y=y, group=choice, label=text), size=7) +
labs(y="Predicted p(select S+)",
x="Normalized \u0394ratings (S+ - S0)",
title="Choice model predictions")
# =================================
# Panel c: Outcome estimation means
# =================================
estimates <- outcome_estimation %>%
mutate(reward = ifelse(reward=="Rewarded", "S+", "S-"))
p3 <- ggplot(estimates, aes(y = gain_eval, x = choice)) +
stat_summary_bin(aes(y=gain_eval, fill=reward), fun.y="mean", geom="bar", binwidth=0.2,
position=position_dodge(width=1)) +
stat_summary(aes(color=reward, x=choice),
fun.data=mean_se, fun.args = list(mult=1),
geom="errorbar", width=0.2, size=0.9, position=position_dodge(width=1)) +
# geom_point(aes(color=reward, x=choice),
# position=position_jitterdodge(dodge.width=1, jitter.width=0.1, jitter.height=0.01),
# shape=21, stroke=pointstroke, size=1.4, fill="white", alpha=0.95) +
geom_point(aes(color=reward, x=choice),
position=position_jitterdodge(dodge.width=1, jitter.width=0.1, jitter.height=0.01),
shape=21, stroke=pointstroke, size=2, fill="white") +
geom_hline(yintercept=0.5, linetype="dashed", size=linesize) +
scale_y_continuous(expand=c(0,0), breaks=c(0,0.5,1), limits=c(-0.03,1.05)) +
theme +
labs(y="p(estimate as S+)", title="Outcome Estimation") +
theme(
legend.position = c(.82, .95),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6),
legend.text=element_text(size=18),
axis.title.x=element_blank(),
aspect.ratio=2.2/2,
plot.title = element_text(margin=margin(0,0,30,0))) +
#scale_color_manual(values=c("#B34E00", "#36875F")) +
scale_color_manual(values=c("#9E4908", "#246B47")) +
scale_fill_manual(values=c("#D55E00","#54B082")) +
scale_x_discrete(breaks = c("Chosen", "Unchosen"),
labels = c(expression(S[chosen]), expression(S[unchosen])))
# ===================================================================
# Panel d: Relationship between decision bias and outcome estimations
# ===================================================================
# create 95% HDI ribbon for model fits
inverse_group_fits <- as.data.frame(M_inverse_decision_estimation) %>%
mutate(intercept_chosen = `(Intercept)` + choice_centered,
slope_chosen = reward_diff + `choice_centered:reward_diff`,
intercept_unchosen = `(Intercept)` - choice_centered,
slope_unchosen = reward_diff - `choice_centered:reward_diff`)
# predict bias for plotting
n <- 100 # number x values
# create new data for chosen and unchosen pairs seperately
conds <- c(1,-1); new_data_list <- list()
for (i in 1:length(conds)){
p_gain_data_cond <- inverse_decision_estimation[inverse_decision_estimation[,"choice_centered"]==conds[i],"reward_diff"]
x_steps <- seq(min(p_gain_data_cond,na.rm=1), max(p_gain_data_cond,na.rm=1), length.out = n)
new_data_list[[i]] <- data.frame(obs = seq_along(x_steps), x1=conds[i], x2=x_steps)
colnames(new_data_list[[i]])[c(2,3)] <- c("choice_centered","reward_diff")
names(new_data_list)[i] <- sprintf("cond%d",conds[i])
}
# predict y values given the new data
pred_list <- list()
for (i in 1:length(new_data_list)){
pred_list[[i]] <- posterior_linpred(M_inverse_decision_estimation,newdata = new_data_list[[i]])
names(pred_list)[i] <- names(new_data_list)[i]
}
# compute summary stats for each x observation
predicted_summary_list <- list()
for (c in 1:length(new_data_list)){
df_pred <- pred_list[[c]] %>%
as.data.frame %>%
setNames(seq_len(ncol(.))) %>%
tibble::rownames_to_column("posterior_sample") %>%
tidyr::gather_("obs", "fitted", setdiff(names(.), "posterior_sample")) %>%
group_by(obs) %>%
dplyr::summarise(median = median(fitted),
lower = quantile(fitted, 0.025),
upper = quantile(fitted, 0.975)) %>%
mutate(obs = as.numeric(obs)) %>%
left_join(new_data_list[[c]], by="obs") %>% arrange(obs)
predicted_summary_list[[c]] <- df_pred; names(predicted_summary_list)[c] <- names(new_data_list)[c]
}
predicted_summary_bias_estimations <- rbind(predicted_summary_list[[1]],predicted_summary_list[[2]]) %>%
mutate(p_gain_centered = median,
choice = ifelse(choice_centered == 1, "Chosen", "Unchosen"))
inverse_decision_estimation_text <- as.data.frame(M_inverse_decision_estimation) %>%
mutate(Slope_Unchosen = reward_diff - `choice_centered:reward_diff`,
Slope_Chosen = reward_diff + `choice_centered:reward_diff`) %>%
select(Slope_Chosen,Slope_Unchosen) %>%
gather(coef,value) %>%
separate(coef, c("coef","choice"), "_") %>%
group_by(coef, choice) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(sig = ifelse((median>0 & HDI95_low>0 & HDI95_high>0) | (median<0 & HDI95_low<0 & HDI95_high<0),"*",""),
text = sprintf("\u03b2(%s %s):\n%.2f [%.2f, %.2f]%s",coef, choice, median, HDI95_low, HDI95_high,sig)) %>%
mutate(x = ifelse(choice=="Chosen", 0, -0.5),
y = ifelse(choice=="Chosen", -0.2, 0.35))
p4 <- ggplot(subset(inverse_decision_estimation, choice=="Unchosen"),
aes(y=p_gain_centered,x=reward_diff)) + #, color=factor(choice_centered)) +
geom_point(size=point_size, fill="white", shape=21, stroke=point_stroke) +
theme +
point_plot_theme +
# geom_ribbon(data = predicted_summary_bias_estimations, aes(ymin=lower, ymax=upper, group=choice), fill="#E9E9E9")+
# geom_line(aes(y=median, linetype = choice), data=predicted_summary_bias_estimations, size=line_size*1.5) +
# scale_linetype_manual(values=c("longdash", "solid")) +
geom_ribbon(data = subset(predicted_summary_bias_estimations,choice=="Unchosen"), aes(ymin=lower, ymax=upper), fill="#E9E9E9")+
geom_line(data = subset(predicted_summary_bias_estimations,choice=="Unchosen"), aes(y=median), size=line_size*1.5) +
geom_hline(yintercept=0, size=line_size, linetype="dashed") +
geom_vline(xintercept=0, size=line_size, linetype="dashed") +
scale_y_continuous(expand=c(0,0), breaks=c(-0.5,0,0.5), limits=c(-0.55,0.55)) +
scale_x_continuous(expand=c(0,0), breaks=c(-1, 0, 1), limits=c(-1.05, 1.05)) +
theme(legend.position="none", plot.title = element_text(margin=margin(0,0,30,0))) +
geom_text(subset(inverse_decision_estimation_text, coef=="Slope" & choice=="Unchosen"),
size=7,
mapping=aes(x=x, y=y, label=text)) + #, group=choice)) +
labs(y="p(select S+) - 0.5",
x="Inverse estimation score",
#x=expression(atop("Inverse estimation score","p(estimate S+ as S+) - p(estimate S0 as S+)")),
title="Decision bias and outcome estimations")
# facet_wrap(.~choice,
# labeller = as_labeller(c("Chosen" = "Chosen pairs", "Unchosen" = "Unchosen pairs")))
p <- plot_grid(p1,p2,p3,p4,
ncol=2,nrow=2,
axis="bt",
align="v",
labels=c("a","b","c","d"),
label_size = 30,
label_fontfamily = "Helvetica")
if (Save_plots == 1) {ggsave(filename=sprintf("../results/Plots/%s.%s","Figure2",fig_type),
plot=p,
width=fig_size[1]+4,
height=fig_size[2]+4)}
Relationship between associative memory and inverse bias
Here we present regression models assessing the relationship between inverse inference of value and associative memory of the deliberation pairs. We analyse these effects both across participants and within participants.
# =============================
# Between participants analysis
# =============================
# Compute measures of interest
memory_bias <- clean_data_Exp1$final_decisions %>%
mutate(choice = ifelse(chosen_trial==1, "Chosen", "Unchosen")) %>%
group_by(PID, choice) %>%
dplyr::summarize(p_gain = mean(higher_outcome_chosen, na.rm=1),
pair_acc = mean(pair_acc, na.rm=1)) %>%
spread(choice,p_gain) %>%
mutate(inverse_bias = Chosen - Unchosen)
# Model inverse decision bias and pairs memory
if (run_models==1){
coefs_pair_acc_bias_Exp1 <- run_bias_memory_model(subset(clean_data_Exp1$final_decisions, !is.na(left_chosen)),
"pair_acc",c(),params,"Exp1")
load("../data/Models/Memory_Bias/Between_subs/Model_objects/M_pair_acc_bias_Exp1.RData")
} else {
load("../data/Models/Memory_Bias/Between_subs/Model_objects/M_pair_acc_bias_Exp1.RData")
load("../data/Models/Memory_Bias/Between_subs/Coef_lists/coefs_pair_acc_bias_Exp1.RData")
}
# Present model coefs
pairs_acc_model <- as.data.frame(M_pair_acc_bias_Exp1) %>%
gather(coef, value, `(Intercept)`:sigma) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(value = sprintf("%.2f [%.2f, %.2f]",median, HDI95_low, HDI95_high)) %>%
dplyr::select(coef, value)
print("between-participants analysis")
## [1] "between-participants analysis"
pairs_acc_model %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
0.06 [-0.08, 0.20]
|
|
pair_acc
|
0.67 [0.47, 0.88]
|
|
sigma
|
0.23 [0.21, 0.25]
|
# =============================
# Within participants analysis
# =============================
# For every participant, categorize their deliberation pairs into two groups according to outcome estimations
subs <- unique(clean_data_Exp1$deliberation$PID)
deliberation <- c(); memory <- c(); outcome_evaluation <- c()
for (s in subs){
curr_outcome_eval <- subset(clean_data_Exp1$outcome_evaluation, PID==s)
curr_final_decisions <- subset(clean_data_Exp1$final_decisions, PID==s)
curr_deliberation <- subset(clean_data_Exp1$deliberation, PID==s)
curr_memory <- subset(clean_data_Exp1$memory, PID==s)
# insert outcome evaluation to the deliberation matrix, and add a measure of whether this is a direct or inverse pair (whether the outcomes of chosen and unchosen items within a pair are estimated to be the same or not)
for (t in 1:nrow(curr_deliberation)){
curr_deliberation$gain_eval_chosen[t] <-
curr_outcome_eval$outcome_eval_gain[curr_outcome_eval$stimulus_id==curr_deliberation$chosen_obj[t]]
curr_deliberation$gain_eval_unchosen[t] <-
curr_outcome_eval$outcome_eval_gain[curr_outcome_eval$stimulus_id==curr_deliberation$unchosen_obj[t]]
curr_deliberation$pair_grouping[t] <-
ifelse(curr_deliberation$gain_eval_chosen[t]==curr_deliberation$gain_eval_unchosen[t],
"Direct transfer",
"Inverse transfer")
}
# use deliberation info to assign memory pairs
for (t in 1:nrow(curr_memory)){
curr_memory$pair_type_left[t] <-
curr_deliberation$pair_grouping[curr_deliberation$stimulus_left==curr_memory$stimulus_left[t] |
curr_deliberation$stimulus_right==curr_memory$stimulus_left[t]]
curr_memory$pair_type_right[t] <-
curr_deliberation$pair_grouping[curr_deliberation$stimulus_left==curr_memory$stimulus_right[t] |
curr_deliberation$stimulus_right==curr_memory$stimulus_right[t]]
curr_memory$pair_type_cond[t] <- ifelse(curr_memory$pair_type_left[t]!=curr_memory$pair_type_right[t],
"Direct/Inverse",
ifelse(curr_memory$pair_type_left[t]=="Inverse transfer",
"Inverse transfer",
"Direct transfer"))
}
# bind subjects mats
outcome_evaluation <- bind_rows(outcome_evaluation,curr_outcome_eval)
memory <- bind_rows(memory,curr_memory)
deliberation <- bind_rows(deliberation, curr_deliberation)
}
# Use old trials only because they necessarily include either direct or inverse transfer
memory_per_deliberation <- subset(memory, old_pair==1) %>%
mutate(pair_type_centered = ifelse(pair_type_cond=="Inverse transfer", 1, -1))
# Compute memory means for each deliberation group
memory_per_deliberation_subs <- memory_per_deliberation %>%
group_by(PID, pair_type_cond) %>%
dplyr::summarise(pair_acc = mean(pair_acc, na.rm=1))
memory_per_deliberation_means <- memory_per_deliberation_subs %>%
group_by(pair_type_cond) %>%
dplyr::summarize(mean = mean(pair_acc),
se = sd(pair_acc)/sqrt(n())) %>%
mutate(`pair memory accuracy` = sprintf("%.2f \u00b1 %.2f",mean, se)) %>%
dplyr::rename(`condition type` = pair_type_cond) %>%
dplyr::select(`condition type`, `pair memory accuracy`)
print("within-participants analysis")
## [1] "within-participants analysis"
memory_per_deliberation_means %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
condition type
|
pair memory accuracy
|
|
Direct transfer
|
0.68 ± 0.02
|
|
Inverse transfer
|
0.75 ± 0.01
|
# Model the effect
if (run_models==1){
M_memory_per_deliberation_Exp1 <- stan_glmer(data = memory_per_deliberation,
pair_acc ~ pair_type_centered + (pair_type_centered | PID),
family = binomial(link="logit"),
adapt_delta = params$adapt_delta,
iter = params$iterations,
chains = params$chains,
warmup = params$warmup)
save(M_memory_per_deliberation_Exp1,
file = "../data/Models/Memory_Bias/Within_subs/M_memory_per_deliberation_Exp1.RData")
} else {
load("../data/Models/Memory_Bias/Within_subs/M_memory_per_deliberation_Exp1.RData")
}
# Present model coefs
memory_pair_type_model <- as.data.frame(M_memory_per_deliberation_Exp1) %>%
gather(coef, value, `(Intercept)`:pair_type_centered) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(value = sprintf("%.2f [%.2f, %.2f]",median, HDI95_low, HDI95_high)) %>%
dplyr::select(coef,value)
memory_pair_type_model %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
1.03 [0.91, 1.16]
|
|
pair_type_centered
|
0.15 [0.05, 0.25]
|
Figure 3 - memory and bias
# inverse decision bias as a function of pairs memory
memory_model_text <- coefs_pair_acc_bias_Exp1$posterior_draws_per_cond$no_cond %>%
gather(coef, value) %>%
group_by(coef) %>%
dplyr::summarize(Median=median(value),
low95=quantile(value, 0.025),
high95=quantile(value, 0.975)) %>%
mutate(sig = ifelse((Median>0 & low95>0 & high95>0) | (Median<0 & low95<0 & high95<0),"*",""),
x = 0.25,
y = -0.5,
text = sprintf("\u03b2(Memory):\n%.2f [%.2f, %.2f]%s",Median, low95, high95, sig)) %>%
subset(coef=="Slope")
p1 <- ggplot(coefs_pair_acc_bias_Exp1$bias_memory_data, aes(y=bias_diff,x=pair_acc)) +
geom_point(size=point_size, fill="white", shape=21, stroke=point_stroke) +
theme +
point_plot_theme +
geom_ribbon(data = mutate(coefs_pair_acc_bias_Exp1$predicted_summary_list[[1]], bias_diff=median),
aes(ymin=lower, ymax=upper), fill="#DFEBE9") +
geom_line(aes(y=median), data=coefs_pair_acc_bias_Exp1$predicted_summary_list[[1]],
colour="turquoise4", size=line_size*1.5) +
geom_hline(yintercept=0, size=line_size, linetype="dashed") +
geom_vline(xintercept=0.5, size=line_size, linetype="dashed") +
scale_y_continuous(expand=c(0,0), breaks=c(-1,0,1), limits=c(-1.025,1.025)) +
scale_x_continuous(expand=c(0,0), breaks=c(0, 0.5, 1), limits=c(-0.025, 1.025)) +
theme(legend.position="none", plot.title = element_text(margin=margin(0,0,30,0))) +
labs(y=expression(atop("Inverse decision bias","p(select "*S[chosen]*"+) - p(select "*S[unchosen]*"+)")),
x="Pairs memory (accuracy)",
title="Between participants") +
geom_text(data=memory_model_text, mapping=aes(x=x, y=y, label=text), size=8)
memory_per_deliberation_subs <- mutate(memory_per_deliberation_subs, condition=NaN)
# pairs accuracy per deliberation pair
p2 <- ggplot(memory_per_deliberation_subs,
aes(x=pair_type_cond,y=pair_acc)) +
stat_summary_bin(aes(y=pair_acc), fun.y="mean", geom="bar", binwidth=0.2,
position=position_dodge(width=1), fill="grey") +
geom_point(aes(color=factor(condition)), position=position_jitterdodge(dodge.width=1, jitter.width=0.1),
fill="white", shape=21, stroke=point_stroke, size=point_size) +
scale_color_manual(values="black") +
stat_summary(fun.data=mean_se, fun.args = list(mult=n_sem), geom="errorbar", width=0.1, size=1,
position=position_nudge(0.2), color="turquoise4") +
geom_hline(yintercept=0.5, size=line_size, linetype="dashed") +
scale_y_continuous(expand=c(0,0), breaks=c(0,0.5,1), limits=c(0,1.025)) +
theme +
theme(legend.position="none",
aspect.ratio=3/2,
plot.title = element_text(margin=margin(0,0,30,0))) +
labs(x = "Type of deliberation pairs",
y="Pairs memory (accuracy)",
title="Within participants")
p <- plot_grid(p1,p2,
ncol=2,
axis="bt",
align="v",
labels=c("a","b"),
label_size = 30,
label_fontfamily = "Helvetica",
rel_widths = c(1,0.8))
if (Save_plots == 1) {ggsave(filename=sprintf("../results/Plots/%s.%s","Figure3",fig_type),
plot=p,
width=fig_size[1]+7,
height=fig_size[2]-1)}
Testing whether the results hold when removing explicit strategy participants
# a potential confound of our study is that participants simply inferred that the task design is such that if their choice resulted in a gain, then the unchosen option was probably a loss. In the end of the task we asked participants how they made their choices in unchosen pairs. We now tag the participants who argued for this inference and remove them from analysis to see whetehr the results replicate even without them.
strategy <- clean_data_Exp1$strategies
# These are the people who explictly mentioned an inverse strategy
inverse_strategy <- c("1Yu0G", "3PT1e", "6PhdR", "8mjxB", "9Ai4r", "a5b8U", "ADPTy", "aIvIN", "bI48S", "DAt9p", "diiDU", "fl6Ph", "H3ROo", "JeouF", "mEVNr", "n8kIj", "nYFCE", "OAsQd", "oJGUI", "uzP2b", "xvwYZ", "z9lUP")
Inverse decision bias
# =====================
# Inverse decision bias
# =====================
Exp1_FD_no_strategy <- clean_data_Exp1$final_decisions %>%
mutate(inverse_strategy = ifelse(PID %in% inverse_strategy, 1, 0))
Exp1_FD_no_strategy %>%
mutate(choice = ifelse(chosen_trial==1, "Chosen", "Unchosen")) %>%
group_by(PID, inverse_strategy, choice) %>%
dplyr::summarize(p_gain = mean(higher_outcome_chosen, na.rm=1)) %>%
group_by(inverse_strategy, choice) %>%
dplyr::summarize(mean = mean(p_gain, na.rm=1),
se = sd(p_gain, na.rm=1)/sqrt(n()),
n = n(),
n_effect = sum(p_gain<0.5),
percent = round(sum(p_gain<0.5)/n()*100)) %>%
mutate(`p(select rewarded)` = sprintf("%.2f \u00b1 %.2f", mean, se),
n_effect = ifelse(choice=="Chosen", n-n_effect, n_effect),
`n effect` = sprintf("%d/%d", n_effect, n),
`% effect` = ifelse(choice=="Chosen", 100-percent, percent),
`reported strategy` = ifelse(inverse_strategy==1, "Inverse strategy","No inverse strategy")) %>%
ungroup(inverse_strategy) %>%
dplyr::select(`reported strategy`, choice, `p(select rewarded)`, `n effect`, `% effect`) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
reported strategy
|
choice
|
p(select rewarded)
|
n effect
|
% effect
|
|
No inverse strategy
|
Chosen
|
0.92 ± 0.01
|
213/213
|
100
|
|
No inverse strategy
|
Unchosen
|
0.45 ± 0.01
|
131/213
|
62
|
|
Inverse strategy
|
Chosen
|
0.97 ± 0.01
|
22/22
|
100
|
|
Inverse strategy
|
Unchosen
|
0.24 ± 0.04
|
20/22
|
91
|
if (run_models==1){
# run model and use function to rearrange the coefficients
M_Exp1_choice_ratings_no_strategy <- run_choice_ratings_model(
subset(Exp1_FD_no_strategy, !is.na(left_chosen) & inverse_strategy==0),c(),params,"Exp1")
coef_list_Exp1_no_strategy <- create_choice_ratings_coef_list(M_Exp1_choice_ratings_no_strategy, "Exp1", "Pairs")
} else {
load("../data/Models/Strategy_analysis/coef_list_Exp1_no_strategy.RData")
}
coef_list_Exp1_no_strategy$summary_group_coefs %>%
mutate(sig = ifelse((Median>0 & low95HDI>0 & high95HDI>0) |
(Median<0 & low95HDI<0 & high95HDI<0),"*",""),
value = sprintf("%.2f [%.2f, %.2f]%s",Median, low95HDI, high95HDI, sig)) %>%
dplyr::select(coef, value) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
1.51 [1.38, 1.65]*
|
|
chosen_trial_centered
|
1.81 [1.67, 1.96]*
|
|
norm_drate_by_outcome
|
0.31 [0.17, 0.44]*
|
|
chosen_trial_centered:norm_drate_by_outcome
|
-0.03 [-0.16, 0.11]
|
|
Intercept_Chosen_Pairs
|
3.31 [3.09, 3.57]*
|
|
Slope_Chosen_Pairs
|
0.28 [0.07, 0.50]*
|
|
Intercept_Unchosen_Pairs
|
-0.30 [-0.44, -0.16]*
|
|
Slope_Unchosen_Pairs
|
0.33 [0.16, 0.50]*
|
Outcome Estimation
# ==================
# Outcome Estimation
# ==================
outcome_estimation %>%
mutate(inverse_strategy = ifelse(PID %in% inverse_strategy, 1, 0)) %>%
group_by(inverse_strategy, choice, reward) %>%
dplyr::summarise(mean = mean(gain_eval, na.rm=1),
se = sd(gain_eval, na.rm=1)/sqrt(n()),
n = n(),
n_effect = sum(gain_eval<0.5),
percent = round(sum(gain_eval<0.5)/n()*100,1)) %>%
mutate(`p(estimate as rewarded)` = sprintf("%.2f \u00b1 %.2f",mean, se),
n_effect = ifelse((reward=="Rewarded" & choice=="Chosen") | (reward=="Unrewarded" & choice=="Unchosen"),
n-n_effect, n_effect),
`n effect` = sprintf("%d/%d", n_effect, n),
`% effect` = sprintf("%.f",n_effect/n*100),
`reported strategy` = ifelse(inverse_strategy==1, "Inverse strategy","No inverse strategy")) %>%
ungroup(inverse_strategy) %>%
dplyr::select(`reported strategy`,choice,reward,`p(estimate as rewarded)`,`n effect`,`% effect`) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
reported strategy
|
choice
|
reward
|
p(estimate as rewarded)
|
n effect
|
% effect
|
|
No inverse strategy
|
Chosen
|
Rewarded
|
0.94 ± 0.01
|
211/213
|
99
|
|
No inverse strategy
|
Chosen
|
Unrewarded
|
0.14 ± 0.02
|
187/213
|
88
|
|
No inverse strategy
|
Unchosen
|
Rewarded
|
0.40 ± 0.02
|
124/213
|
58
|
|
No inverse strategy
|
Unchosen
|
Unrewarded
|
0.51 ± 0.02
|
135/213
|
63
|
|
Inverse strategy
|
Chosen
|
Rewarded
|
0.98 ± 0.01
|
22/22
|
100
|
|
Inverse strategy
|
Chosen
|
Unrewarded
|
0.06 ± 0.03
|
21/22
|
95
|
|
Inverse strategy
|
Unchosen
|
Rewarded
|
0.28 ± 0.06
|
15/22
|
68
|
|
Inverse strategy
|
Unchosen
|
Unrewarded
|
0.83 ± 0.05
|
20/22
|
91
|
# ===============================================
# Model estimations as a function of outcome type
# ===============================================
if (run_models==1){
clean_data_Exp1$outcome_evaluation <- mutate(clean_data_Exp1$outcome_evaluation,
chosen_obj_centered = ifelse(chosen_obj==0,-1,1),
inverse_strategy = ifelse(PID %in% inverse_strategy, 1, 0))
M_outcome_estimation_no_strategy <- stan_glmer(data = subset(clean_data_Exp1$outcome_evaluation,
inverse_strategy==0),
outcome_eval_gain ~ chosen_obj_centered * reward_type +
(chosen_obj_centered * reward_type | PID),
family = binomial(link="logit"),
adapt_delta = params$adapt_delta,
iter = params$iterations,
chains = params$chains,
warmup = params$warmup)
save(M_outcome_estimation_no_strategy, file = "../data/Models/Strategy_analysis/M_outcome_estimation_no_strategy.RData")
} else {
load("../data/Models/Strategy_analysis/M_outcome_estimation_no_strategy.RData")
}
# Rearrange model coefficients to get coefficients of interest
model_posterior <- as.data.frame(M_outcome_estimation_no_strategy)
model_posterior[,!str_detect(colnames(model_posterior),"PID")] %>%
mutate(gain_chosen = `(Intercept)` + chosen_obj_centered + reward_type + `chosen_obj_centered:reward_type`,
no_gain_chosen = `(Intercept)` + chosen_obj_centered - reward_type - `chosen_obj_centered:reward_type`,
gain_unchosen = `(Intercept)` - chosen_obj_centered + reward_type - `chosen_obj_centered:reward_type`,
no_gain_unchosen = `(Intercept)` - chosen_obj_centered - reward_type + `chosen_obj_centered:reward_type`) %>%
gather(coef,value) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(value = sprintf("%.2f [%.2f, %.2f]",median, HDI95_low, HDI95_high)) %>%
dplyr::select(coef, value) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
0.16 [0.01, 0.33]
|
|
chosen_obj_centered
|
0.38 [0.24, 0.53]
|
|
chosen_obj_centered:reward_type
|
1.69 [1.51, 1.90]
|
|
gain_chosen
|
3.63 [3.18, 4.20]
|
|
gain_unchosen
|
-0.50 [-0.69, -0.32]
|
|
no_gain_chosen
|
-2.55 [-2.95, -2.20]
|
|
no_gain_unchosen
|
0.07 [-0.13, 0.28]
|
|
reward_type
|
1.40 [1.23, 1.60]
|
Relationship between inverse decision bias and inverse estimation of value
# ==========================================================================
# Relationship between inverse decision bias and inverse estimation of value
# ==========================================================================
outcome_estimation <- outcome_estimation %>%
mutate(inverse_strategy = ifelse(PID %in% inverse_strategy, 1, 0))
inverse_decision_estimation_no_strategy <- subset(outcome_estimation,inverse_strategy==0) %>%
select(-c(eval_acc, eval_rt)) %>%
spread(reward, gain_eval) %>%
mutate(reward_diff = Rewarded-Unrewarded) %>%
merge(p_gain, by=c("PID","choice")) %>%
mutate(choice_centered = ifelse(choice=="Chosen", 1, -1),
p_gain_centered = p_gain - 0.5)
if (run_models==1){
M_inverse_decision_estimation_no_strategy <-
stan_glm(data = inverse_decision_estimation_no_strategy,
p_gain_centered ~ choice_centered * reward_diff,
family = gaussian(),
adapt_delta = params$adapt_delta,
iter = params$iterations,
chains = params$chains,
warmup = params$warmup)
save(M_inverse_decision_estimation_no_strategy, file = "../data/Models/Strategy_analysis/M_inverse_decision_estimation_no_strategy.RData")
} else {
load("../data/Models/Strategy_analysis/M_inverse_decision_estimation_no_strategy.RData")
}
as.data.frame(M_inverse_decision_estimation_no_strategy) %>%
mutate(intercept_chosen = `(Intercept)` + choice_centered,
slope_chosen = reward_diff + `choice_centered:reward_diff`,
intercept_unchosen = `(Intercept)` - choice_centered,
slope_unchosen = reward_diff - `choice_centered:reward_diff`) %>%
gather(coef,value) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(value = sprintf("%.2f [%.2f, %.2f]",median, HDI95_low, HDI95_high)) %>%
dplyr::select(coef, value) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
0.08 [0.05, 0.10]
|
|
choice_centered
|
0.09 [0.06, 0.11]
|
|
choice_centered:reward_diff
|
-0.02 [-0.05, 0.02]
|
|
intercept_chosen
|
0.16 [0.12, 0.21]
|
|
intercept_unchosen
|
-0.01 [-0.03, 0.00]
|
|
reward_diff
|
0.33 [0.30, 0.37]
|
|
sigma
|
0.12 [0.11, 0.13]
|
|
slope_chosen
|
0.32 [0.26, 0.37]
|
|
slope_unchosen
|
0.35 [0.31, 0.40]
|
Relationship between inverse decision bias and associative memory - between participants
# ========================================================================================
# Relationship between inverse decision bias and associative memory - between participants
# ========================================================================================
# Model inverse decision bias and pairs memory
if (run_models==1){
coefs_pair_acc_bias_Exp1_no_strategy <-
run_bias_memory_model(subset(Exp1_FD_no_strategy, inverse_strategy==0),
"pair_acc",c(),params,"Exp1_no_strategy")
load("../data/Models/Memory_Bias/Between_subs/Model_objects/M_Exp1_no_strategy_memory_bias.RData")
} else {
load("../data/Models/Strategy_analysis/M_Exp1_no_strategy_memory_bias.RData")
load("../data/Models/Strategy_analysis/coefs_pair_acc_bias_Exp1_no_strategy.RData")
}
# Present model coefs
as.data.frame(M_Exp1_no_strategy_memory_bias) %>%
gather(coef, value, `(Intercept)`:sigma) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(value = sprintf("%.2f [%.2f, %.2f]",median, HDI95_low, HDI95_high)) %>%
dplyr::select(coef, value) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
0.12 [-0.03, 0.26]
|
|
pair_acc
|
0.56 [0.33, 0.78]
|
|
sigma
|
0.22 [0.20, 0.24]
|
Relationship between inverse decision bias and associative memory - within participants
# =======================================================================================
# Relationship between inverse decision bias and associative memory - within participants
# =======================================================================================
memory_per_deliberation_subs_no_strategy <- memory_per_deliberation_subs %>%
mutate(inverse_strategy = ifelse(PID %in% inverse_strategy,"Inverse strategy","No inverse startegy"))
memory_per_deliberation_subs_no_strategy %>%
group_by(inverse_strategy, pair_type_cond) %>%
dplyr::summarize(mean = mean(pair_acc),
se = sd(pair_acc)/sqrt(n())) %>%
mutate(`pair memory accuracy` = sprintf("%.2f \u00b1 %.2f",mean, se)) %>%
dplyr::rename(`condition type` = pair_type_cond,
`reported strategy` = inverse_strategy) %>%
dplyr::select(`reported strategy`, `condition type`, `pair memory accuracy`) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
reported strategy
|
condition type
|
pair memory accuracy
|
|
Inverse strategy
|
Direct transfer
|
0.76 ± 0.08
|
|
Inverse strategy
|
Inverse transfer
|
0.86 ± 0.03
|
|
No inverse startegy
|
Direct transfer
|
0.68 ± 0.02
|
|
No inverse startegy
|
Inverse transfer
|
0.74 ± 0.01
|
# Model the effect
memory_per_deliberation_no_strategy <- memory_per_deliberation %>%
mutate(inverse_strategy = ifelse(PID %in% inverse_strategy,1,0))
if (run_models==1){
M_memory_per_deliberation_Exp1_no_strategy <- stan_glmer(
data = subset(memory_per_deliberation_no_strategy, inverse_strategy==0),
pair_acc ~ pair_type_centered + (pair_type_centered | PID),
family = binomial(link="logit"),
adapt_delta = params$adapt_delta,
iter = params$iterations,
chains = params$chains,
warmup = params$warmup)
save(M_memory_per_deliberation_Exp1_no_strategy,
file = "../data/Models/Strategy_analysis/M_memory_per_deliberation_Exp1_no_strategy.RData")
} else {
load("../data/Models/Strategy_analysis/M_memory_per_deliberation_Exp1_no_strategy.RData")
}
# Present model coefs
as.data.frame(M_memory_per_deliberation_Exp1_no_strategy) %>%
gather(coef, value, `(Intercept)`:pair_type_centered) %>%
group_by(coef) %>%
dplyr::summarize(HDI95_low = posterior_interval(as.matrix(value), prob=0.95)[1],
HDI95_high = posterior_interval(as.matrix(value), prob=0.95)[2],
median = median(value)) %>%
mutate(value = sprintf("%.2f [%.2f, %.2f]",median, HDI95_low, HDI95_high)) %>%
dplyr::select(coef,value) %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
coef
|
value
|
|
(Intercept)
|
0.96 [0.84, 1.09]
|
|
pair_type_centered
|
0.13 [0.02, 0.23]
|
Figure 4: decision bias and memory correlations across all data sets
bias_all_exps <- all_exps_list$final_decisions %>%
mutate(choice = ifelse(chosen_trial==1, "Chosen", "Unchosen")) %>%
mutate(Exp = factor(Exp, levels = c("Pilot", "Exp1", "Exp2", "Exp3", "Exp4")),
condition = ifelse(is.na(cond_logical), "None",
ifelse(cond_logical==1, "1", "0"))) %>%
# condition = ifelse(cond_logical==1 | is.na(cond_logical), "1", "0")) %>%
group_by(Exp, choice, condition, PID) %>%
dplyr::summarise(p_gain = mean(higher_outcome_chosen, na.rm=1),
pair_acc = mean(pair_acc, na.rm=1)) %>%
mutate(nudge = ifelse(Exp %in% c("Pilot", "Exp1"), 0.2, 0.125))
bias_all_exps_spread <- bias_all_exps %>%
spread(choice, p_gain) %>%
mutate(bias = Chosen - Unchosen)
# show decision bias for all exps
color1 <- "#3FAFAB"; color2 <- "#DFA214"; color3 <- "black"
fillcolor1 <- "#73D2BC"; fillcolor2 <- "#FFCC33"; fillcolor3 <- "#C1C1C1"
p1 <- ggplot(bias_all_exps, aes(x=choice,y=p_gain,color=condition)) +
stat_summary_bin(aes(y=p_gain, fill=condition), fun.y="mean", color=NA,
geom="bar", binwidth=0.15, position=position_dodge(width=1)) +
geom_point(aes(group=condition), position=position_jitterdodge(dodge.width=1, jitter.width=0.1, jitter.height=0),
fill="white", shape=21, stroke=0.4, size=2) +
scale_color_manual(values=c(color1, color2, color3)) +
scale_fill_manual(values=c(fillcolor1, fillcolor2, fillcolor3)) +
stat_summary(aes(group=condition, x=as.numeric(as.factor(choice))+nudge),
fun.data=mean_se, fun.args = list(mult=n_sem),
geom="errorbar", width=0.15, size=0.7, position=position_dodge(width=1)) +
geom_hline(yintercept=0.5, size=linesize, linetype="dashed") +
scale_y_continuous(expand=c(0,0), breaks=c(0,0.5,1), limits=c(0,1.05)) +
theme +
theme(legend.position = "none",
axis.title.x=element_blank(),
aspect.ratio=3/2,
strip.background = element_rect(colour=NA, fill=NA),
panel.spacing = unit(4, "lines"),
plot.title = element_text(margin=margin(0,0,30,0), hjust = 0.5, size = 30),
text = element_text(size=26,family="Helvetica"),
axis.title = element_text(size = 24),
axis.text = element_text(size = 22, color = "black")) +
scale_x_discrete(breaks = c("Chosen","Unchosen"),
labels = c("Chosen" = expression(S[chosen]),
"Unchosen" = expression(S[unchosen]))) +
labs(y="p(select S+)", title="Inverse decision bias") +
facet_wrap(.~Exp,
ncol=5,
labeller = labeller(Exp = c(Pilot="Pilot\n", Exp1="Experiment 1\n", Exp2="Experiment 2\n",
Exp3="Experiment 3\n", Exp4="Experiment 4\n")))
# show memory vs. inverse bias
# load regression coefficients or run the model
if (run_models==1){
coefs_pair_acc_bias_Pilot <- run_bias_memory_model(subset(clean_data_Pilot$final_decisions, !is.na(left_chosen)),
"pair_acc",c(),params,"Pilot")
coefs_pair_acc_bias_Exp1 <- run_bias_memory_model(subset(clean_data_Exp1$final_decisions, !is.na(left_chosen)),
"pair_acc",c(),params,"Exp1")
coefs_pair_acc_bias_Exp2 <- run_bias_memory_model(subset(clean_data_Exp2$final_decisions, !is.na(left_chosen)),
"pair_acc","repeat_cond_centered",params,"Exp2")
coefs_pair_acc_bias_Exp3 <- run_bias_memory_model(subset(clean_data_Exp3$final_decisions, !is.na(left_chosen)),
"pair_acc","reward_cond",params,"Exp3")
coefs_pair_acc_bias_Exp4 <- run_bias_memory_model(subset(clean_data_Exp4$final_decisions,!is.na(left_chosen)),
"pair_acc","same_painter_centered",params,"Exp4")
} else {
load("../data/Models/Memory_Bias/Between_subs/Coef_lists/coefs_pair_acc_bias_Pilot.RData")
load("../data/Models/Memory_Bias/Between_subs/Coef_lists/coefs_pair_acc_bias_Exp1.RData")
load("../data/Models/Memory_Bias/Between_subs/Coef_lists/coefs_pair_acc_bias_Exp2.RData")
load("../data/Models/Memory_Bias/Between_subs/Coef_lists/coefs_pair_acc_bias_Exp3.RData")
load("../data/Models/Memory_Bias/Between_subs/Coef_lists/coefs_pair_acc_bias_Exp4.RData")
}
arrange_predicted_fits <- function(coef_list, Exp_name){
if (length(colnames(coef_list$predicted_summary_list[[1]]))>5){
pred_summary <- bind_rows(coef_list$predicted_summary_list[[1]],coef_list$predicted_summary_list[[2]])
colnames(pred_summary)[5] <- "condition";
} else {
pred_summary <- coef_list$predicted_summary_list[[1]]
}
pred_summary <- bind_cols(data.frame(Exp=rep(Exp_name,1,nrow(pred_summary))),pred_summary)
return(pred_summary)
}
memory_predicted_fits <- bind_rows(arrange_predicted_fits(coefs_pair_acc_bias_Pilot, "Pilot"),
arrange_predicted_fits(coefs_pair_acc_bias_Exp1, "Exp1"),
arrange_predicted_fits(coefs_pair_acc_bias_Exp2, "Exp2"),
arrange_predicted_fits(coefs_pair_acc_bias_Exp3, "Exp3"),
arrange_predicted_fits(coefs_pair_acc_bias_Exp4, "Exp4")) %>%
mutate(condition = ifelse(is.na(condition), "None", ifelse(condition==1, "1", "0")),
Exp = factor(Exp, levels = c("Pilot", "Exp1", "Exp2", "Exp3", "Exp4")))
arrange_memory_group_fits <- function(coef_list, Exp_name){
post_draws <- coef_list$posterior_draws
if (ncol(post_draws) > 3) { # for Experiments 2-4
post_draws <- post_draws %>%
mutate(`1` = post_draws[,2] + post_draws[,4],
`0` = post_draws[,2] - post_draws[,4],
Experiment = Exp_name) %>%
dplyr::select(Experiment,`1`,`0`) %>%
gather(condition, value, `1`:`0`)
} else {
post_draws <- post_draws %>%
dplyr::rename(None = pair_acc) %>%
mutate(Experiment = Exp_name) %>%
dplyr::select(Experiment, None) %>%
gather(condition, value, None)
}
return(post_draws)
}
memory_group_fits_text <- bind_rows(arrange_memory_group_fits(coefs_pair_acc_bias_Pilot, "Pilot"),
arrange_memory_group_fits(coefs_pair_acc_bias_Exp1, "Exp1"),
arrange_memory_group_fits(coefs_pair_acc_bias_Exp2, "Exp2"),
arrange_memory_group_fits(coefs_pair_acc_bias_Exp3, "Exp3"),
arrange_memory_group_fits(coefs_pair_acc_bias_Exp4, "Exp4")) %>%
group_by(Experiment, condition) %>%
dplyr::summarize(Median=median(value), low95=quantile(value, 0.025), high95=quantile(value, 0.975)) %>%
mutate(sig = ifelse((low95>0 & high95>0) | (low95<0 & high95<0),"*",""),
text = sprintf("%.2f [%.2f, %.2f]%s",Median, low95, high95, sig),
x = 0.45,
y = ifelse(condition=="0", -0.5, -0.75)) %>%
mutate(Exp = factor(Experiment, levels = c("Pilot", "Exp1", "Exp2", "Exp3", "Exp4"))) %>%
arrange(Exp)
p2 <- ggplot(bias_all_exps_spread, aes(y=bias, x=pair_acc)) +
geom_hline(yintercept=0, size=line_size, linetype="dashed") +
geom_vline(xintercept=0.5, size=line_size, linetype="dashed") +
geom_point(size=point_size-2.5, shape=21, fill="white", stroke=point_stroke, aes(color=condition)) +
theme + point_plot_theme +
geom_ribbon(data = mutate(memory_predicted_fits, bias=median),
aes(ymin=lower, ymax=upper, fill=condition)) +
geom_line(aes(y=median,color=condition), data=memory_predicted_fits, size=line_size*1.5) +
scale_color_manual(values=c(color1, color2, color3)) +
scale_fill_manual(values=c(fillcolor1, fillcolor2, fillcolor3)) +
scale_y_continuous(expand=c(0,0), breaks=c(-1,0,1), limits=c(-1.025,1.025)) +
scale_x_continuous(expand=c(0,0), breaks=c(0, 0.5, 1), limits=c(-0.025, 1.025)) +
theme(legend.position = "none",
plot.title = element_text(margin=margin(0,0,30,0), hjust = 0.5, size = 28),
text = element_text(size=26,family="Helvetica"),
axis.title = element_text(size = 24),
axis.text = element_text(size = 20, color = "black")) +
labs(y="Inverse decision bias",
x="Pairs memory",
title="Associative memory and inverse decision bias") +
geom_text(data=memory_group_fits_text, mapping=aes(x=x, y=y, label=text, color=condition),size=7) +
facet_wrap(.~Exp,
ncol=5,
labeller = labeller(Exp = c(Pilot="Pilot\n", Exp1="Experiment 1\n", Exp2="Experiment 2\n",
Exp3="Experiment 3\n", Exp4="Experiment 4\n")))
p <- plot_grid(p1, p2,
nrow=2,
axis="bt",
labels=c("a","b"),
label_size = 30,
label_fontfamily = "Helvetica")
if (Save_plots == 1) {ggsave(filename=sprintf("../results/Plots/%s.%s","Figure4",fig_type),
plot=p,
width=fig_size[1]+10,
height=fig_size[2]+2)}
Power analysis for Experiment 1 based on Pilot study
library("pwr")
library("lme4")
library("lsr")
# ========================================================
# run simple logistic regressions to predict decision bias
# ========================================================
final_decisions_pilot <- subset(clean_data_Pilot$final_decisions, !is.na(left_chosen))
# run logistic fits for each subject and detect their unchosen intercept
subs <- unique(final_decisions_pilot$PID)
subs_coefs <- data.frame()
for (i in 1:length(subs)){
sub_data <- subset(final_decisions_pilot, PID == subs[i])
m_sub <- glm(data = sub_data,
higher_outcome_chosen ~ chosen_trial_centered * norm_drate_by_outcome,
family = binomial(link = "logit"))
subs_coefs[i,1] <- subs[i]
subs_coefs[i,c(2:5)] <- m_sub$coefficients
}
colnames(subs_coefs) <- c("PID",rownames(as.data.frame(m_sub$coefficients)))
# compute unchosen intercept (intercept coef - chosen_trial coef)
subs_coefs <- mutate(subs_coefs, unchosen_intercept = `(Intercept)` - chosen_trial_centered)
# compute power
compute_power <- function(desired_power,desired_sig_level,data,coef,null_point){
t_stats <- data %>%
dplyr::summarize(
t_value = as.numeric(t.test(!!sym(coef), rep(null_point, n()), paired=TRUE)["statistic"]),
p_value = as.numeric(t.test(!!sym(coef), rep(null_point, n()), paired=TRUE)["p.value"]),
cohens_d = cohensD(x=!!sym(coef),y=rep(null_point, n()),method="paired"),
power = as.numeric(pwr.t.test(n = n(), d = cohens_d, sig.level = desired_sig_level,
type = c("paired"))["power"]),
desired_sample = as.numeric(pwr.t.test(power = desired_power, d = cohens_d,
sig.level = desired_sig_level, type = c("paired"))["n"]))
return(t_stats)
}
power_pilot <- compute_power(0.99, 0.05, subs_coefs, "unchosen_intercept",0)
colnames(power_pilot) <- c("t value", "p value", "Cohen's d", "power in pilot study", "desired sample to get 99% power")
power_pilot %>%
kbl() %>%
kable_paper("hover", full_width = F, html_font = "Helvetica", position = "left")
|
t value
|
p value
|
Cohen’s d
|
power in pilot study
|
desired sample to get 99% power
|
|
-3.714987
|
0.0003484
|
0.3852262
|
0.9569163
|
125.746
|
Consistency in Deliberation phase
clean_data_Exp1$deliberation %>%
mutate(choice_consistency = choice_consistency + 1) %>% # we add 1 because we computed how many times the same decision was made before the last block, but a better measure is how many times the decision was repeated (so we need to add the last block to the computation)
group_by(PID) %>%
dplyr::summarise(cons_mean = mean(choice_consistency, na.rm=1)) %>%
dplyr::summarise(mean = mean(cons_mean, na.rm=1),
sd = sd(cons_mean, na.rm=1),
se = sd(cons_mean, na.rm=1)/sqrt(n()))
## # A tibble: 1 x 3
## mean sd se
## <dbl> <dbl> <dbl>
## 1 2.87 0.161 0.0105